Add debug_op hypercall.
Add many debug points.
Signed-off-by: Tristan Gingold <tgingold@free.fr>
data8 do_ni_hypercall /* */
data8 do_dom0vp_op /* dom0vp_op */
data8 do_pirq_guest_eoi /* arch_1 */
- data8 do_ni_hypercall /* arch_2 */ /* 50 */
+ data8 do_ia64_debug_op /* arch_2 */ /* 50 */
data8 do_ni_hypercall /* arch_3 */
data8 do_ni_hypercall /* arch_4 */
data8 do_ni_hypercall /* arch_5 */
#include <asm/vmx_vcpu.h>
#include <asm/vmx_mm_def.h>
#include <asm/vmx_pal_vsa.h>
+#include <asm/debugger.h>
/* SDM vol2 5.5 - IVA based interruption handling */
#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
viva = vmx_vcpu_get_iva(vcpu);
regs->cr_iip = viva + vec;
+
+ debugger_event(vec == IA64_EXTINT_VECTOR ?
+ XEN_IA64_DEBUG_ON_EXTINT : XEN_IA64_DEBUG_ON_EXCEPT);
}
#include <xen/sched.h>
#include <asm/pgtable.h>
#include <asm/vmmu.h>
+#include <asm/debugger.h>
static const int valid_mm_mode[8] = {
GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */
(old_psr.rt != new_psr.rt )
) {
switch_mm_mode (vcpu, old_psr, new_psr);
+ debugger_event(XEN_IA64_DEBUG_ON_MMU);
}
return;
if (FP_PSR(vcpu) & IA64_PSR_DFH)
regs->cr_ipsr |= IA64_PSR_DFH;
+ if (unlikely(vcpu->domain->debugger_attached)) {
+ if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_SS)
+ regs->cr_ipsr |= IA64_PSR_SS;
+ if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_DB)
+ regs->cr_ipsr |= IA64_PSR_DB;
+ }
+
check_mm_mode_switch(vcpu, old_psr, new_psr);
return ;
}
#include <asm/vmx.h>
#include <asm/virt_event.h>
#include <asm/vmx_phy_mode.h>
+#include <asm/debugger.h>
#ifdef BYPASS_VMAL_OPCODE
static void
}
#endif // CHECK_FAULT
+ if (debugger_event(XEN_IA64_DEBUG_ON_RFI)) {
+ raise_softirq(SCHEDULE_SOFTIRQ);
+ do_softirq();
+ }
+
regs=vcpu_regs(vcpu);
vpsr.val=regs->cr_ipsr;
if ( vpsr.is == 1 ) {
}
#endif // VMAL_NO_FAULT_CHECK
+ debugger_event(XEN_IA64_DEBUG_ON_TC);
+
return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7));
}
}
#endif // VMAL_NO_FAULT_CHECK
+ debugger_event(XEN_IA64_DEBUG_ON_TC);
+
return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7));
}
}
#endif // VMAL_NO_FAULT_CHECK
+ debugger_event(XEN_IA64_DEBUG_ON_TC);
+
return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7));
}
u64 r2,r3;
if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
return IA64_FAULT;
+
+ debugger_event(XEN_IA64_DEBUG_ON_TR);
+
return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7));
}
u64 r2,r3;
if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT )
return IA64_FAULT;
+
+ debugger_event(XEN_IA64_DEBUG_ON_TR);
+
return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7));
}
return IA64_FAULT;
}
+ debugger_event(XEN_IA64_DEBUG_ON_TR);
+
return (vmx_vcpu_itr_d(vcpu, slot, pte, itir, ifa));
}
return IA64_FAULT;
}
+ debugger_event(XEN_IA64_DEBUG_ON_TR);
+
return vmx_vcpu_itr_i(vcpu, slot, pte, itir, ifa);
}
return IA64_FAULT;
}
+ debugger_event(XEN_IA64_DEBUG_ON_TC);
+
return vmx_vcpu_itc_d(vcpu, pte, itir, ifa);
}
return IA64_FAULT;
}
+ debugger_event(XEN_IA64_DEBUG_ON_TC);
+
return vmx_vcpu_itc_i(vcpu, pte, itir, ifa);
}
#else
inst.inst=opcode;
#endif /* BYPASS_VMAL_OPCODE */
+
+ debugger_event(XEN_IA64_DEBUG_ON_PRIVOP);
+
/*
* Switch to actual virtual rid in rr0 and rr4,
* which is required by some tlb related instructions.
PSCB(v, interrupt_collection_enabled) = 0;
perfc_incra(slow_reflect, vector >> 8);
+
+ debugger_event(vector == IA64_EXTINT_VECTOR ?
+ XEN_IA64_DEBUG_ON_EXTINT : XEN_IA64_DEBUG_ON_EXCEPT);
}
void reflect_event(void)
PSCB(v, vpsr_dfh) = 0;
v->vcpu_info->evtchn_upcall_mask = 1;
PSCB(v, interrupt_collection_enabled) = 0;
+
+ debugger_event(XEN_IA64_DEBUG_ON_EVENT);
}
static int handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
break;
case 29:
vector = IA64_DEBUG_VECTOR;
- if (debugger_trap_entry(vector,regs))
+ if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_DEBUG))
return;
break;
case 30:
break;
case 35:
vector = IA64_TAKEN_BRANCH_TRAP_VECTOR;
- if (debugger_trap_entry(vector,regs))
+ if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_TBRANCH))
return;
break;
case 36:
vector = IA64_SINGLE_STEP_TRAP_VECTOR;
- if (debugger_trap_entry(vector,regs))
+ if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_SSTEP))
return;
break;
#include <xen/hypercall.h>
#include <xen/softirq.h>
#include <xen/time.h>
+#include <asm/debugger.h>
static DEFINE_SPINLOCK(efi_time_services_lock);
long r11 = 0;
long status;
+ debugger_event(XEN_IA64_DEBUG_ON_SAL);
+
status = 0;
switch (index) {
case SAL_FREQ_BASE:
if (unlikely(running_on_sim))
return pal_emulator_static(index);
+ debugger_event(XEN_IA64_DEBUG_ON_PAL);
+
// pal code must be mapped by a TR when pal is called, however
// calls are rare enough that we will map it lazily rather than
// at every context switch
struct vcpu *v = current;
efi_status_t status;
+ debugger_event(XEN_IA64_DEBUG_ON_EFI);
+
*fault = IA64_NO_FAULT;
switch (regs->r2) {
return ret;
}
+
+unsigned long
+do_ia64_debug_op(unsigned long cmd, unsigned long domain,
+ XEN_GUEST_HANDLE(xen_ia64_debug_op_t) u_debug_op)
+{
+ xen_ia64_debug_op_t curop, *op = &curop;
+ struct domain *d;
+ long ret = 0;
+
+ if (!IS_PRIV(current->domain))
+ return -EPERM;
+ if (copy_from_guest(op, u_debug_op, 1))
+ return -EFAULT;
+ d = rcu_lock_domain_by_id(domain);
+ if (d == NULL)
+ return -ESRCH;
+
+ switch (cmd) {
+ case XEN_IA64_DEBUG_OP_SET_FLAGS:
+ d->arch.debug_flags = op->flags;
+ break;
+ case XEN_IA64_DEBUG_OP_GET_FLAGS:
+ op->flags = d->arch.debug_flags;
+ if (copy_to_guest(u_debug_op, op, 1))
+ ret = -EFAULT;
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+ rcu_unlock_domain(d);
+ return ret;
+}
#include <asm/dom_fw_common.h>
#include <public/memory.h>
#include <asm/event.h>
+#include <asm/debugger.h>
static void domain_page_flush_and_put(struct domain* d, unsigned long mpaddr,
volatile pte_t* ptep, pte_t old_pte,
d->domain_id, mpaddr, d->arch.convmem_end);
}
+ debugger_event (XEN_IA64_DEBUG_ON_BAD_MPA);
+
if (entry != NULL)
p2m_entry_set(entry, NULL, __pte(0));
//XXX This is a work around until the emulation memory access to a region
} else if (ia64_get_cpl(ipsr) > CONFIG_CPL0_EMUL)
return IA64_ILLOP_FAULT;
+ debugger_event(XEN_IA64_DEBUG_ON_PRIVOP);
+
switch (slot_type) {
case M:
if (inst.generic.major == 0) {
return 1;
}
perfc_incra(slow_hyperprivop, iim);
+
+ debugger_event(XEN_IA64_DEBUG_ON_PRIVOP);
+
switch (iim) {
case HYPERPRIVOP_RFI:
vcpu_rfi(v);
#define debugger_trap_immediate() ((void)0)
#endif
-static inline int debugger_trap_entry(
- unsigned int vector, struct cpu_user_regs *regs)
+static inline int debugger_event(unsigned long event)
{
struct vcpu *v = current;
+ struct domain *d = v->domain;
- if (guest_kernel_mode(regs) && v->domain->debugger_attached) {
+ if (unlikely (d->debugger_attached && (d->arch.debug_flags & event))) {
+ d->arch.debug_event = event;
domain_pause_for_debugger();
return 1;
}
+ return 0;
+}
+static inline int debugger_kernel_event(
+ struct cpu_user_regs *regs, unsigned long event)
+{
+ struct vcpu *v = current;
+ struct domain *d = v->domain;
+
+ if (unlikely(d->debugger_attached && (d->arch.debug_flags & event)
+ && guest_kernel_mode(regs))) {
+ d->arch.debug_event = event;
+ domain_pause_for_debugger();
+ return 1;
+ }
return 0;
}
struct opt_feature opt_feature;
+ /* Debugging flags. See arch-ia64.h for bits definition. */
+ unsigned int debug_flags;
+
+ /* Reason of debugging break. */
+ unsigned int debug_event;
+
#ifdef CONFIG_XEN_IA64_TLB_TRACK
struct tlb_track* tlb_track;
#endif
extern int ia64_hypercall (struct pt_regs *regs);
extern void vmx_save_state(struct vcpu *v);
extern void vmx_load_state(struct vcpu *v);
-extern void show_registers(struct pt_regs *regs);
-#define show_execution_state show_registers
extern unsigned long __gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
extern void sync_split_caches(void);
extern void set_privileged_operation_isr (struct vcpu *vcpu,int inst);
unsigned long vadr;
unsigned long rid;
};
+typedef struct ia64_tr_entry ia64_tr_entry_t;
+DEFINE_XEN_GUEST_HANDLE(ia64_tr_entry_t);
struct vcpu_tr_regs {
struct ia64_tr_entry itrs[8];
/* Fast and light hypercalls. */
#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1
+/* Extra debug features. */
+#define __HYPERVISOR_ia64_debug_op __HYPERVISOR_arch_2
+
+/* Set/Get extra conditions to break. */
+#define XEN_IA64_DEBUG_OP_SET_FLAGS 1
+#define XEN_IA64_DEBUG_OP_GET_FLAGS 2
+
+/* Break on kernel single step. */
+#define XEN_IA64_DEBUG_ON_KERN_SSTEP (1 << 0)
+
+/* Break on kernel debug (breakpoint or watch point). */
+#define XEN_IA64_DEBUG_ON_KERN_DEBUG (1 << 1)
+
+/* Break on kernel taken branch. */
+#define XEN_IA64_DEBUG_ON_KERN_TBRANCH (1 << 2)
+
+/* Break on interrupt injection. */
+#define XEN_IA64_DEBUG_ON_EXTINT (1 << 3)
+
+/* Break on interrupt injection. */
+#define XEN_IA64_DEBUG_ON_EXCEPT (1 << 4)
+
+/* Break on event injection. */
+#define XEN_IA64_DEBUG_ON_EVENT (1 << 5)
+
+/* Break on privop/virtualized instruction (slow path only). */
+#define XEN_IA64_DEBUG_ON_PRIVOP (1 << 6)
+
+/* Break on emulated PAL call (at entry). */
+#define XEN_IA64_DEBUG_ON_PAL (1 << 7)
+
+/* Break on emulated SAL call (at entry). */
+#define XEN_IA64_DEBUG_ON_SAL (1 << 8)
+
+/* Break on emulated EFI call (at entry). */
+#define XEN_IA64_DEBUG_ON_EFI (1 << 9)
+
+/* Break on rfi emulation (slow path only, before exec). */
+#define XEN_IA64_DEBUG_ON_RFI (1 << 10)
+
+/* Break on address translation switch. */
+#define XEN_IA64_DEBUG_ON_MMU (1 << 11)
+
+/* Break on bad guest physical address. */
+#define XEN_IA64_DEBUG_ON_BAD_MPA (1 << 12)
+
+/* Force psr.ss bit. */
+#define XEN_IA64_DEBUG_FORCE_SS (1 << 13)
+
+/* Force psr.db bit. */
+#define XEN_IA64_DEBUG_FORCE_DB (1 << 14)
+
+/* Break on ITR/PTR. */
+#define XEN_IA64_DEBUG_ON_TR (1 << 15)
+
+/* Break on ITC/PTC.L/PTC.G/PTC.GA. */
+#define XEN_IA64_DEBUG_ON_TC (1 << 16)
+
+/* Get translation cache. */
+#define XEN_IA64_DEBUG_OP_GET_TC 3
+
+/* Translate virtual address to guest physical address. */
+#define XEN_IA64_DEBUG_OP_TRANSLATE 4
+
+#ifndef __ASSEMBLY__
+union xen_ia64_debug_op {
+ uint64_t flags;
+ struct xen_ia64_debug_vtlb {
+ uint64_t nbr; /* IN/OUT */
+ XEN_GUEST_HANDLE_64(ia64_tr_entry_t) tr; /* IN/OUT */
+ } vtlb;
+};
+typedef union xen_ia64_debug_op xen_ia64_debug_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_ia64_debug_op_t);
+#endif /* __ASSEMBLY__ */
+
/* Xencomm macros. */
#define XENCOMM_INLINE_MASK 0xf800000000000000UL
#define XENCOMM_INLINE_FLAG 0x8000000000000000UL